/* Ensure real hardware interrupts are enabled. */
ed->arch.guest_context.user_regs.eflags |= EF_IE;
+ } else {
+ __vmwrite(GUEST_EFLAGS, ed->arch.guest_context.user_regs.eflags);
+ if (ed->arch.guest_context.user_regs.eflags & EF_TF)
+ __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
+ else
+ __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
}
if ( test_bit(_VCPUF_initialised, &ed->vcpu_flags) )
cr0 &= ~X86_CR0_TS;
__vmwrite(GUEST_CR0, cr0);
}
- __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
+ __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
static void vmx_do_general_protection_fault(struct cpu_user_regs *regs)
save_cpu_user_regs(®s);
pdb_handle_exception(3, ®s, 1);
restore_cpu_user_regs(®s);
+ break;
+ }
+#else
+ case TRAP_debug:
+ {
+ void store_cpu_user_regs(struct cpu_user_regs *regs);
+ long do_sched_op(unsigned long op);
+
+
+ store_cpu_user_regs(®s);
+ __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
+
+ set_bit(_VCPUF_ctrl_pause, ¤t->vcpu_flags);
+ do_sched_op(SCHEDOP_yield);
+
break;
}
#endif
return 0;
}
#elif defined (__i386__)
-static void store_cpu_user_regs(struct cpu_user_regs *regs)
+void store_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmread(GUEST_SS_SELECTOR, ®s->ss);
__vmread(GUEST_ESP, ®s->esp);
#define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
#define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
+/* Pending Debug exceptions */
+
+#define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
+#define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
+
#ifdef XEN_DEBUGGER
#define MONITOR_DEFAULT_EXCEPTION_BITMAP \
( EXCEPTION_BITMAP_PG | \
return 0;
}
+static inline int __vm_set_bit(unsigned long field, unsigned long mask)
+{
+ unsigned long tmp;
+ int err = 0;
+
+ err |= __vmread(field, &tmp);
+ tmp |= mask;
+ err |= __vmwrite(field, tmp);
+
+ return err;
+}
+
+static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
+{
+ unsigned long tmp;
+ int err = 0;
+
+ err |= __vmread(field, &tmp);
+ tmp &= ~mask;
+ err |= __vmwrite(field, tmp);
+
+ return err;
+}
+
static inline void __vmxoff (void)
{
__asm__ __volatile__ ( VMXOFF_OPCODE
__vmread(CR0_READ_SHADOW, &cr0);
if (!(cr0 & X86_CR0_TS))
- __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP |
- EXCEPTION_BITMAP_NM);
+ __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
#endif /* __ASM_X86_VMX_H__ */